write_cr3(pagetable_val(ed->arch.monitor_table));
}
-
-static inline void invalidate_shadow_ldt(struct exec_domain *d)
+void invalidate_shadow_ldt(struct exec_domain *d)
{
int i;
unsigned long pfn;
static void process_deferred_ops(unsigned int cpu)
{
unsigned int deferred_ops;
+ struct domain *d = current->domain;
deferred_ops = percpu_info[cpu].deferred_ops;
percpu_info[cpu].deferred_ops = 0;
type = PGT_l1_page_table | PGT_va_mutable;
pin_page:
+ if ( shadow_mode_enabled(FOREIGNDOM) )
+ type = PGT_writable_page;
+
okay = get_page_and_type_from_pagenr(op.mfn, type, FOREIGNDOM);
if ( unlikely(!okay) )
{
case MMUEXT_NEW_BASEPTR:
okay = new_guest_cr3(op.mfn);
+ percpu_info[cpu].deferred_ops &= ~DOP_FLUSH_TLB;
break;
#ifdef __x86_64__
break;
case MMUEXT_INVLPG_LOCAL:
+ if ( shadow_mode_enabled(d) )
+ shadow_invlpg(ed, op.linear_addr);
local_flush_tlb_one(op.linear_addr);
break;
}
pset = vcpuset_to_pcpuset(d, vset);
if ( op.cmd == MMUEXT_TLB_FLUSH_MULTI )
+ {
+ BUG_ON(shadow_mode_enabled(d) && ((pset & d->cpuset) != (1<<cpu)));
flush_tlb_mask(pset & d->cpuset);
+ }
else
+ {
+ BUG_ON(shadow_mode_enabled(d) && ((pset & d->cpuset) != (1<<cpu)));
flush_tlb_one_mask(pset & d->cpuset, op.linear_addr);
+ }
break;
}
case MMUEXT_TLB_FLUSH_ALL:
+ BUG_ON(shadow_mode_enabled(d) && (d->cpuset != (1<<cpu)));
flush_tlb_mask(d->cpuset);
break;
case MMUEXT_INVLPG_ALL:
+ BUG_ON(shadow_mode_enabled(d) && (d->cpuset != (1<<cpu)));
flush_tlb_one_mask(d->cpuset, op.linear_addr);
break;
case MMUEXT_SET_LDT:
{
+ if ( shadow_mode_external(d) )
+ {
+ // ignore this request from an external domain...
+ MEM_LOG("ignoring SET_LDT hypercall from external "
+ "domain %u\n", d->id);
+ okay = 0;
+ break;
+ }
+
unsigned long ptr = op.linear_addr;
unsigned long ents = op.nr_ents;
if ( ((ptr & (PAGE_SIZE-1)) != 0) ||
unsigned int foreigndom)
{
mmu_update_t req;
- unsigned long va = 0, pfn, prev_pfn = 0;
+ unsigned long va = 0, mfn, prev_mfn = 0, gpfn;
struct pfn_info *page;
int rc = 0, okay = 1, i = 0, cpu = smp_processor_id();
unsigned int cmd, done = 0;
if ( unlikely(shadow_mode_enabled(d)) )
check_pagetable(ed, "pre-mmu"); /* debug */
- if ( unlikely(shadow_mode_translate(d)) )
- domain_crash_synchronous();
-
if ( unlikely(count & MMU_UPDATE_PREEMPTED) )
{
count &= ~MMU_UPDATE_PREEMPTED;
__mark_dirty(d, mfn);
gpfn = __mfn_to_gpfn(d, mfn);
- ASSERT(gpfn);
+ ASSERT(VALID_M2P(gpfn));
+
if ( page_is_page_table(page) )
shadow_mark_mfn_out_of_sync(ed, gpfn, mfn);
}
if ( unlikely(__put_user(val, &l1_pgentry_val(
linear_pg_table[l1_linear_offset(va)]))) )
- return -EINVAL;
+ {
+ rc = -EINVAL;
+ goto out;
+ }
// also need to update the shadow
if ( shadow_mode_log_dirty(d) )
mark_dirty(d, va_to_l1mfn(ed, va));
+ out:
shadow_unlock(d);
check_pagetable(ed, "post-va"); /* debug */
u32 l2_idx;
struct exec_domain *ed = current;
- // not supported in combination with various shadow modes!
- ASSERT( !shadow_mode_enabled(ed->domain) );
+ if ( unlikely(shadow_mode_enabled(ed->domain)) )
+ return 0;
/*
* Attempt to read the PTE that maps the VA being accessed. By checking for
struct pfn_info *mmfn_info;
struct domain *d = ed->domain;
- ASSERT(!pagetable_val(ed->arch.monitor_table)); /* we should only get called once */
+ ASSERT(pagetable_val(ed->arch.monitor_table) == 0);
mmfn_info = alloc_domheap_page(NULL);
- ASSERT( mmfn_info );
+ ASSERT(mmfn_info != NULL);
mmfn = (unsigned long) (mmfn_info - frame_table);
mpl2e = (l2_pgentry_t *) map_domain_mem(mmfn << PAGE_SHIFT);
shadow_lock(d);
sh_check_name = s;
- SH_VVLOG("%s-PT Audit", s);
+ //SH_VVLOG("%s-PT Audit", s);
sh_l2_present = sh_l1_present = 0;
perfc_incrc(check_pagetable);
unmap_domain_mem(spl2e);
unmap_domain_mem(gpl2e);
+#if 0
SH_VVLOG("PT verified : l2_present = %d, l1_present = %d",
sh_l2_present, sh_l1_present);
+#endif
out:
if ( errors )
#define SHADOW_SNAPSHOT_ELSEWHERE (-1L)
/************************************************************************/
-#define SHADOW_DEBUG 0
-#define SHADOW_VERBOSE_DEBUG 0
-#define SHADOW_VVERBOSE_DEBUG 0
-#define SHADOW_HASH_DEBUG 0
-#define FULLSHADOW_DEBUG 0
+#define SHADOW_DEBUG 1
+#define SHADOW_VERBOSE_DEBUG 1
+#define SHADOW_VVERBOSE_DEBUG 1
+#define SHADOW_HASH_DEBUG 1
+#define FULLSHADOW_DEBUG 1
#if SHADOW_DEBUG
extern int shadow_status_noswap;
if ( need_flush )
{
perfc_incrc(update_hl2e_invlpg);
- __flush_tlb_one(&linear_pg_table[l1_linear_offset(va)]);
+ local_flush_tlb_one(&linear_pg_table[l1_linear_offset(va)]);
}
}
perfc_incrc(shadow_status_hit_head);
}
- SH_VVLOG("lookup gpfn=%p => status=%p", key, head->smfn);
+ //SH_VVLOG("lookup gpfn=%p => status=%p", key, head->smfn);
return head->smfn;
}
}
while ( x != NULL );
- SH_VVLOG("lookup gpfn=%p => status=0", key);
+ //SH_VVLOG("lookup gpfn=%p => status=0", key);
perfc_incrc(shadow_status_miss);
return 0;
}